[RISCV] Remove redundant test cases for index segment load (3/8).

Differential Revision: https://reviews.llvm.org/D97022

GitOrigin-RevId: 8cc0b1cbea7d7ef89a950d709654555ec6949136
diff --git a/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll
index f21817b..16fc35c 100644
--- a/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll
+++ b/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll
@@ -18,58 +18,20 @@
   ret <vscale x 16 x i16> %1
 }
 
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv1i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
   ret <vscale x 16 x i16> %1
 }
 
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv1i8(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
 
@@ -86,432 +48,20 @@
   ret <vscale x 16 x i16> %1
 }
 
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv2i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
   ret <vscale x 16 x i16> %1
 }
 
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv2i32(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv4i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv4i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv32i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 32 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv32i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv1i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv1i32(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv8i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv8i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv8i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv8i8(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv8i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 8 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv8i32(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv64i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 64 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv64i8(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv4i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv4i8(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv1i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 1 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv1i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv32i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 32 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv32i8(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv2i8(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv2i8(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
 
@@ -528,126 +78,20 @@
   ret <vscale x 16 x i16> %1
 }
 
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
+; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v16, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv2i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 2 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
   ret <vscale x 16 x i16> %1
 }
 
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv2i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv4i32(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 4 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i16> @test_vluxseg2_nxv16i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
-  ret <vscale x 16 x i16> %1
-}
-
-define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.nxv16i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv4i32(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
-  ret <vscale x 16 x i16> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -664,160 +108,20 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -834,194 +138,20 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -1038,229 +168,20 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg2_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -1277,165 +198,22 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -1452,200 +230,22 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -1662,236 +262,22 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg3_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -1908,170 +294,23 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -2088,206 +327,23 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -2304,243 +360,23 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg4_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg4.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -2557,175 +393,24 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -2742,212 +427,24 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -2964,250 +461,24 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg5_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg5.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -3224,180 +495,25 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -3414,218 +530,25 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -3642,257 +565,25 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg6_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg6.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -3909,11 +600,10 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -3921,173 +611,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -4104,11 +636,10 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -4116,212 +647,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -4338,11 +672,10 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -4350,252 +683,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg7_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg7.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv16i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv16i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -4612,51 +708,10 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv16i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -4665,137 +720,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv16i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv2i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv2i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv4i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv4i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv32i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv32i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -4812,51 +745,10 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv8i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -4865,177 +757,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv8i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv8i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv8i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv8i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv8i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv64i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv64i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv4i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv4i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -5052,211 +782,10 @@
   ret <vscale x 1 x i8> %1
 }
 
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv32i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv32i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv2i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv2i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv16i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv16i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv2i16(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv2i16(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
-}
-
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv4i32(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i8> @test_vluxseg8_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
-  ret <vscale x 1 x i8> %1
-}
-
-define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf8,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -5265,15 +794,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf8,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.nxv1i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
-  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv4i32(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
-  ret <vscale x 1 x i8> %3
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
 }
 
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
@@ -5292,58 +819,20 @@
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv1i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv1i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
 
@@ -5360,432 +849,20 @@
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv2i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv2i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv4i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv4i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv32i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv32i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv1i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv1i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv8i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv8i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv8i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv8i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv8i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv8i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv64i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 64 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv64i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv4i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv4i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv1i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv1i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv32i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv32i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv2i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv2i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
 
@@ -5802,92 +879,20 @@
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v16, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv2i16(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv2i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv4i32(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg2_nxv16i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.nxv16i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv4i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
 
@@ -5904,60 +909,21 @@
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv1i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv1i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
 
@@ -5974,445 +940,22 @@
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv2i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv2i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv4i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv4i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv32i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv32i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv1i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv1i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv8i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv8i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv8i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv8i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv8i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv8i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv64i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 64 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv64i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv4i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv4i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv1i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv1i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv32i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv32i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv2i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv2i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
 
@@ -6429,95 +972,21 @@
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v16, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv2i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv2i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv4i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg3_nxv16i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.nxv16i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv4i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
 
@@ -6534,62 +1003,23 @@
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v18
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv1i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv1i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
 
@@ -6606,458 +1036,23 @@
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv2i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv2i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv4i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv4i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv32i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv32i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv1i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv1i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv8i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv8i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv8i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv8i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv8i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 8 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v12
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv8i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv64i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 64 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv64i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv4i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv4i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv1i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 1 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv1i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv32i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 32 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv32i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv2i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv2i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
 
@@ -7074,200 +1069,22 @@
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vluxseg4ei32.v v6, (a0), v16, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv2i16(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 2 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
   ret <vscale x 16 x i8> %1
 }
 
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv2i16(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv4i32(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 4 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i8> @test_vluxseg4_nxv16i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
-  ret <vscale x 16 x i8> %1
-}
-
-define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.nxv16i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv4i32(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
-  ret <vscale x 16 x i8> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -7284,364 +1101,20 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -7658,58 +1131,20 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -7726,163 +1161,20 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg2_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -7899,375 +1191,22 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -8284,60 +1223,22 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -8354,168 +1255,22 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg3_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -8532,386 +1287,23 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -8928,62 +1320,23 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -9000,173 +1353,23 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg4_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -9183,397 +1386,24 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -9590,64 +1420,24 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -9664,178 +1454,24 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg5_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg5.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -9852,408 +1488,25 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -10270,66 +1523,25 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -10346,183 +1558,25 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg6_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg6.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -10539,11 +1593,10 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -10551,407 +1604,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -10968,11 +1629,10 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -10980,56 +1640,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -11046,11 +1665,10 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -11058,176 +1676,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg7_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg7.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv16i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv16i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv1i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv1i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv16i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv16i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -11244,171 +1701,10 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv4i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv4i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv32i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv32i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv1i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv1i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv8i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -11417,257 +1713,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv8i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv8i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv8i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv8i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv8i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv64i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv64i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv4i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv4i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv1i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv1i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv32i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv32i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -11684,70 +1738,27 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v10
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv16i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv16i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -11764,51 +1775,10 @@
   ret <vscale x 2 x i32> %1
 }
 
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv4i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i32> @test_vluxseg8_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
-  ret <vscale x 2 x i32> %1
-}
-
-define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -11817,151 +1787,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.nxv2i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv4i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
-  ret <vscale x 2 x i32> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
 }
 
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
@@ -11980,228 +1812,20 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -12218,194 +1842,20 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg2_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -12422,164 +1872,20 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -12596,235 +1902,22 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -12841,200 +1934,22 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg3_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -13051,169 +1966,21 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -13230,242 +1997,23 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -13482,206 +2030,23 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg4_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -13698,174 +2063,23 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -13882,249 +2096,24 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -14141,212 +2130,24 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg5_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -14363,179 +2164,24 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -14552,256 +2198,25 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -14818,218 +2233,25 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg6_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -15046,184 +2268,25 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -15240,11 +2303,10 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -15252,251 +2314,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -15513,11 +2339,10 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -15525,212 +2350,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg7_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -15747,11 +2375,10 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -15759,177 +2386,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv16i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv16i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv1i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv1i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv16i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv16i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv2i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv2i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -15946,131 +2411,10 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv32i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv32i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv1i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv1i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv8i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -16079,137 +2423,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv8i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv8i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv8i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv8i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv8i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv64i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv64i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -16226,230 +2448,27 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v10
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv1i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv1i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv32i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv32i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv2i8(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv2i8(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv16i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv16i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv2i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i16> @test_vluxseg8_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
-  ret <vscale x 4 x i16> %1
-}
-
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv2i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -16466,62 +2485,25 @@
   ret <vscale x 4 x i16> %1
 }
 
-define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v12
+; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v19, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v13
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
-  ret <vscale x 4 x i16> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
 }
 
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
@@ -16540,160 +2522,20 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -16710,194 +2552,20 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -16914,229 +2582,20 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg2_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -17153,165 +2612,22 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -17328,200 +2644,22 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -17538,236 +2676,22 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg3_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -17784,170 +2708,23 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -17964,206 +2741,23 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -18180,243 +2774,23 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg4_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg4.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -18433,175 +2807,24 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -18618,212 +2841,24 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -18840,250 +2875,24 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg5_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg5.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -19100,180 +2909,25 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -19290,218 +2944,25 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -19518,257 +2979,25 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg6_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg6.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -19785,11 +3014,10 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -19797,173 +3025,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -19980,11 +3050,10 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -19992,212 +3061,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -20214,11 +3086,10 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -20226,252 +3097,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg7_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg7.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv16i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv16i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -20488,51 +3122,10 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv16i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -20541,137 +3134,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv16i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv2i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv2i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv4i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv4i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv32i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv32i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -20688,51 +3159,10 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv8i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -20741,177 +3171,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv8i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv8i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv8i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv8i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv8i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv64i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv64i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv4i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv4i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -20928,211 +3196,10 @@
   ret <vscale x 1 x i32> %1
 }
 
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv32i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv32i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv2i8(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv2i8(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv16i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv16i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv2i16(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv2i16(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv4i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i32> @test_vluxseg8_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
-  ret <vscale x 1 x i32> %1
-}
-
-define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -21141,253 +3208,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.nxv1i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
-  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv4i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
-  ret <vscale x 1 x i32> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv16i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv16i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv1i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv1i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv16i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv16i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv2i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv2i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv4i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv4i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv32i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv32i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv1i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv1i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
 }
 
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
@@ -21406,22 +3233,18 @@
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
 }
 
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
@@ -21440,22 +3263,18 @@
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
 }
 
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
@@ -21474,541 +3293,20 @@
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv64i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv64i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv4i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv4i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv1i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv1i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv32i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv32i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv2i8(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv2i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv16i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv16i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv2i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv2i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv4i32(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg2_nxv8i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.nxv8i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv4i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv16i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv16i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv1i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv1i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv16i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv16i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv2i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv2i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv4i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv4i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv32i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv32i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv1i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv1i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -22025,23 +3323,20 @@
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
 }
 
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
@@ -22060,23 +3355,20 @@
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
 }
 
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
@@ -22095,557 +3387,21 @@
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv64i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv64i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv4i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv4i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv1i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv1i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv32i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv32i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv2i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv2i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv16i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv16i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv2i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv2i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv4i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg3_nxv8i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.nxv8i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv4i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv16i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv16i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv1i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv1i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv16i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv16i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv2i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv2i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv4i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv4i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv32i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv32i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv1i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv1i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -22662,24 +3418,21 @@
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vmv2r.v v14, v12
+; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
 }
 
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
@@ -22698,24 +3451,21 @@
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vmv2r.v v14, v12
+; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
 }
 
 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
@@ -22734,552 +3484,23 @@
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv64i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vmv2r.v v18, v16
 ; CHECK-NEXT:    vmv2r.v v20, v16
 ; CHECK-NEXT:    vmv2r.v v22, v16
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v18
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv64i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv4i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
   ret <vscale x 8 x i16> %1
 }
 
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv4i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv1i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv1i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv32i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv32i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv2i8(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv2i8(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv16i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv16i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv2i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv2i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv4i32(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i16> @test_vluxseg4_nxv8i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
-  ret <vscale x 8 x i16> %1
-}
-
-define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.nxv8i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv4i32(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
-  ret <vscale x 8 x i16> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -23296,22 +3517,18 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
@@ -23330,22 +3547,18 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
@@ -23364,541 +3577,20 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg2_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -23915,23 +3607,19 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
@@ -23950,23 +3638,20 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
@@ -23985,557 +3670,21 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg3_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -24552,24 +3701,21 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
@@ -24588,24 +3734,21 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
@@ -24624,573 +3767,22 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg4ei32.v v7, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg4_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -25207,25 +3799,22 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
@@ -25244,25 +3833,22 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
@@ -25281,589 +3867,23 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg5ei32.v v7, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg5_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -25880,26 +3900,23 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
@@ -25918,26 +3935,23 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
@@ -25956,605 +3970,25 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg6_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -26571,11 +4005,10 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -26583,15 +4016,13 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
@@ -26610,11 +4041,10 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -26622,15 +4052,13 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
@@ -26649,11 +4077,10 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -26661,609 +4088,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg7_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv16i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv16i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv1i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv1i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv16i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv16i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv2i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv2i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv4i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv4i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv32i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv32i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv1i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv1i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -27280,28 +4113,25 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v12
+; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v19, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v13
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
@@ -27320,28 +4150,25 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v10
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
 }
 
 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
@@ -27360,51 +4187,10 @@
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv64i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vmv1r.v v17, v16
 ; CHECK-NEXT:    vmv1r.v v18, v16
 ; CHECK-NEXT:    vmv1r.v v19, v16
@@ -27413,535 +4199,15 @@
 ; CHECK-NEXT:    vmv1r.v v22, v16
 ; CHECK-NEXT:    vmv1r.v v23, v16
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv64i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv4i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
   ret <vscale x 8 x i8> %1
 }
 
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv4i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv1i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv1i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv32i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv32i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv2i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv2i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv16i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv16i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv2i16(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv2i16(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv4i32(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i8> @test_vluxseg8_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
-  ret <vscale x 8 x i8> %1
-}
-
-define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.nxv8i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv4i32(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
-  ret <vscale x 8 x i8> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv16i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv16i16(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv1i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv1i8(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv16i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv16i8(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv2i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv2i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv4i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv4i16(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv32i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv32i16(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv1i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv1i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -27958,22 +4224,18 @@
   ret <vscale x 8 x i32> %1
 }
 
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
+  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
+  ret <vscale x 8 x i32> %1
 }
 
 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
@@ -27992,22 +4254,18 @@
   ret <vscale x 8 x i32> %1
 }
 
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
+  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
+  ret <vscale x 8 x i32> %1
 }
 
 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
@@ -28026,432 +4284,20 @@
   ret <vscale x 8 x i32> %1
 }
 
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv64i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
   ret <vscale x 8 x i32> %1
 }
 
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv64i8(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv4i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv4i8(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv1i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv1i16(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv32i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv32i8(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv2i8(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv2i8(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv16i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv16i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv2i16(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv2i16(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv4i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i32> @test_vluxseg2_nxv8i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
-  ret <vscale x 8 x i32> %1
-}
-
-define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.nxv8i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
-  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv4i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
-  ret <vscale x 8 x i32> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -28468,228 +4314,20 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -28706,194 +4344,20 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg2_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -28910,164 +4374,20 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.nxv4i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -29084,235 +4404,22 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -29329,200 +4436,22 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg3_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -29539,169 +4468,21 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.nxv4i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -29718,242 +4499,23 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -29970,206 +4532,23 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg4_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -30186,174 +4565,23 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.nxv4i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -30370,249 +4598,24 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -30629,212 +4632,24 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg5_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -30851,179 +4666,24 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.nxv4i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -31040,256 +4700,25 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -31306,218 +4735,25 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg6_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -31534,184 +4770,25 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.nxv4i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -31728,11 +4805,10 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -31740,251 +4816,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -32001,11 +4841,10 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -32013,212 +4852,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg7_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -32235,11 +4877,10 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -32247,177 +4888,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.nxv4i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv16i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv16i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv1i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv1i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv16i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv16i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv2i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv2i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -32434,131 +4913,10 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv32i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv32i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv1i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv1i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv8i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -32567,137 +4925,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv8i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv8i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv8i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv8i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv8i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv64i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv64i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -32714,230 +4950,27 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v10
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv1i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv1i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv32i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv32i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv2i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv2i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv16i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv16i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv2i16(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i8> @test_vluxseg8_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
-  ret <vscale x 4 x i8> %1
-}
-
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv2i16(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -32954,62 +4987,25 @@
   ret <vscale x 4 x i8> %1
 }
 
-define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v12
+; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v19, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v13
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.nxv4i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
-  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
-  ret <vscale x 4 x i8> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
 }
 
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
@@ -33028,160 +5024,20 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -33198,194 +5054,20 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -33402,229 +5084,20 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg2_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -33641,165 +5114,22 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -33816,200 +5146,22 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -34026,236 +5178,22 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg3_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -34272,170 +5210,23 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -34452,206 +5243,23 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -34668,243 +5276,23 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg4_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg4.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -34921,175 +5309,24 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -35106,212 +5343,24 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -35328,250 +5377,24 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg5_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg5.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -35588,180 +5411,25 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -35778,218 +5446,25 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -36006,257 +5481,25 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg6_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg6.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -36273,11 +5516,10 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -36285,173 +5527,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -36468,11 +5552,10 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -36480,212 +5563,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -36702,11 +5588,10 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -36714,252 +5599,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg7_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg7.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv16i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv16i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -36976,51 +5624,10 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv16i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -37029,137 +5636,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv16i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv2i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv2i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv4i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv4i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv32i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv32i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -37176,51 +5661,10 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv8i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -37229,177 +5673,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv8i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv8i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv8i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv8i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv8i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv64i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv64i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv4i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv4i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -37416,211 +5698,10 @@
   ret <vscale x 1 x i16> %1
 }
 
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv32i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv32i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv2i8(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv2i8(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv16i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv16i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv2i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv2i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv4i32(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i16> @test_vluxseg8_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
-  ret <vscale x 1 x i16> %1
-}
-
-define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -37629,185 +5710,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.nxv1i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
-  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv4i32(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
-  ret <vscale x 1 x i16> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv16i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 16 x i16>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv16i16(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv1i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i8>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv1i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv16i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 16 x i8>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv16i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv2i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i32>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv2i32(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv4i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i16>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv4i16(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
 }
 
 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
@@ -37826,262 +5735,20 @@
   ret <vscale x 32 x i8> %1
 }
 
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 32 x i1> %mask) {
+define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 32 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
+; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v16, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv1i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i32>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
   ret <vscale x 32 x i8> %1
 }
 
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv1i32(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv8i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i16>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv8i16(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv8i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i8>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv8i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv8i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 8 x i32>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv8i32(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv64i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 64 x i8>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv64i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv4i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i8>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv4i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv1i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 1 x i16>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv1i16(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
 
@@ -38098,262 +5765,20 @@
   ret <vscale x 32 x i8> %1
 }
 
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
+define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv2i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i8>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
   ret <vscale x 32 x i8> %1
 }
 
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv2i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv16i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 16 x i32>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv16i32(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv2i16(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 2 x i16>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv2i16(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv4i32(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 4 x i32>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i8> @test_vluxseg2_nxv32i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
-  ret <vscale x 32 x i8> %1
-}
-
-define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 32 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv32i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.nxv32i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv4i32(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 32 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
-  ret <vscale x 32 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -38370,364 +5795,20 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -38744,58 +5825,20 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -38812,163 +5855,20 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg2_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -38985,375 +5885,22 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -39370,60 +5917,22 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -39440,168 +5949,22 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg3_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -39618,386 +5981,23 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -40014,62 +6014,23 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -40086,173 +6047,23 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg4_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -40269,397 +6080,24 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -40676,64 +6114,24 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -40750,178 +6148,24 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg5_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg5.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -40938,408 +6182,25 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -41356,66 +6217,25 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -41432,183 +6252,25 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg6_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg6.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -41625,11 +6287,10 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -41637,407 +6298,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -42054,11 +6323,10 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -42066,56 +6334,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -42132,11 +6359,10 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -42144,176 +6370,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg7_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg7.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv16i16(i8*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv16i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv16i16(i8* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv16i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv1i8(i8*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv1i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv1i8(i8* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv1i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv16i8(i8*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv16i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv16i8(i8* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv16i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -42330,171 +6395,10 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i32(i8* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv4i16(i8*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv4i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv4i16(i8* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv4i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv32i16(i8*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv32i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv32i16(i8* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv32i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv1i32(i8*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv1i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv1i32(i8* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv1i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv8i16(i8*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv8i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -42503,257 +6407,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv8i16(i8* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv8i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv8i8(i8*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv8i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv8i8(i8* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv8i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv8i32(i8*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv8i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv8i32(i8* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv8i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv64i8(i8*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv64i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv64i8(i8* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv64i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv4i8(i8*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv4i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv4i8(i8* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv4i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv1i16(i8*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv1i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv1i16(i8* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv1i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv32i8(i8*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv32i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv32i8(i8* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv32i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -42770,70 +6432,27 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v10
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i8(i8* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv16i32(i8*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv16i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv16i32(i8* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv16i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -42850,51 +6469,10 @@
   ret <vscale x 2 x i8> %1
 }
 
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv2i16(i8* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv4i32(i8*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv4i32(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i8> @test_vluxseg8_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
-  ret <vscale x 2 x i8> %1
-}
-
-define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e8,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -42903,117 +6481,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e8,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.nxv2i8.nxv4i32(i8* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
-  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv4i32(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
-  ret <vscale x 2 x i8> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
 }
 
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
@@ -43032,364 +6506,20 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -43406,58 +6536,20 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -43474,163 +6566,20 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg2_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -43647,375 +6596,22 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -44032,60 +6628,22 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -44102,168 +6660,22 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg3_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -44280,386 +6692,23 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -44676,62 +6725,23 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -44748,173 +6758,23 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg4_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -44931,397 +6791,24 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -45338,64 +6825,24 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -45412,178 +6859,24 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg5_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg5.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -45600,408 +6893,25 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -46018,66 +6928,25 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -46094,183 +6963,25 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg6_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg6.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -46287,11 +6998,10 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -46299,407 +7009,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -46716,11 +7034,10 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -46728,56 +7045,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -46794,11 +7070,10 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -46806,176 +7081,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg7_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg7.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv16i16(i16*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv16i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv16i16(i16* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv16i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv1i8(i16*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv1i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv1i8(i16* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv1i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv16i8(i16*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv16i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv16i8(i16* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv16i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -46992,171 +7106,10 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i32(i16* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv4i16(i16*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv4i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv4i16(i16* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv4i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv32i16(i16*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv32i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv32i16(i16* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv32i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv1i32(i16*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv1i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv1i32(i16* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv1i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv8i16(i16*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv8i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -47165,257 +7118,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv8i16(i16* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv8i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv8i8(i16*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv8i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv8i8(i16* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv8i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv8i32(i16*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv8i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv8i32(i16* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv8i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv64i8(i16*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv64i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv64i8(i16* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv64i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv4i8(i16*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv4i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv4i8(i16* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv4i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv1i16(i16*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv1i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv1i16(i16* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv1i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv32i8(i16*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv32i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv32i8(i16* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv32i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -47432,70 +7143,27 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v10
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i8(i16* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv16i32(i16*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv16i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv16i32(i16* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv16i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -47512,51 +7180,10 @@
   ret <vscale x 2 x i16> %1
 }
 
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv2i16(i16* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv4i32(i16*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv4i32(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i16> @test_vluxseg8_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
-  ret <vscale x 2 x i16> %1
-}
-
-define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -47565,151 +7192,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.nxv2i16.nxv4i32(i16* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
-  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv4i32(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
-  ret <vscale x 2 x i16> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv16i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv16i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv1i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv1i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv16i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv16i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv2i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv2i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
 }
 
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
@@ -47728,228 +7217,20 @@
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv32i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv32i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv1i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv1i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv8i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv8i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv8i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv8i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv8i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv8i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv64i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv64i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -47966,194 +7247,20 @@
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv1i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv1i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv32i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv32i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv2i8(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv2i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv16i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv16i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv2i16(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg2_nxv4i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv2i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -48170,164 +7277,20 @@
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv16i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv16i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv1i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv1i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv16i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv16i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv2i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv2i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -48344,235 +7307,22 @@
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv32i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv32i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv1i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv1i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv8i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv8i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv8i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv8i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv8i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv8i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv64i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv64i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -48589,200 +7339,22 @@
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv1i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv1i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv32i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv32i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv2i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv2i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv16i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv16i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv2i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg3_nxv4i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv2i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -48799,169 +7371,22 @@
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv16i16(i32*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv16i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv16i16(i32* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv16i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv1i8(i32*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv1i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv1i8(i32* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv1i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv16i8(i32*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv16i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv16i8(i32* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv16i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv2i32(i32*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv2i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv2i32(i32* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv2i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -48978,242 +7403,23 @@
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv32i16(i32*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv32i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv32i16(i32* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv32i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv1i32(i32*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv1i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv1i32(i32* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv1i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv8i16(i32*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv8i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv8i16(i32* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv8i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv8i8(i32*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv8i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv8i8(i32* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv8i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv8i32(i32*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv8i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v12
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv8i32(i32* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv8i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv64i8(i32*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv64i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv64i8(i32* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv64i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -49230,206 +7436,23 @@
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv1i16(i32*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv1i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv1i16(i32* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv1i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv32i8(i32*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv32i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v12
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv32i8(i32* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv32i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv2i8(i32*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv2i8(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv2i8(i32* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv2i8(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv16i32(i32*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv16i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv16i32(i32* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv16i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv2i16(i32*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv2i16(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i32> @test_vluxseg4_nxv4i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
-  ret <vscale x 4 x i32> %1
-}
-
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv2i16(i32* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv2i16(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
-}
-
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -49446,24 +7469,21 @@
   ret <vscale x 4 x i32> %1
 }
 
-define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vmv2r.v v14, v12
+; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
-  ret <vscale x 4 x i32> %3
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+  ret <vscale x 4 x i32> %1
 }
 
 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
@@ -49482,58 +7502,20 @@
   ret <vscale x 16 x half> %1
 }
 
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv1i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
   ret <vscale x 16 x half> %1
 }
 
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv1i8(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
 
@@ -49550,432 +7532,20 @@
   ret <vscale x 16 x half> %1
 }
 
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv2i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
   ret <vscale x 16 x half> %1
 }
 
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv2i32(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv4i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv4i16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv32i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 32 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv32i16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv1i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv1i32(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv8i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv8i16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv8i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv8i8(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv8i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 8 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv8i32(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv64i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 64 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv64i8(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv4i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv4i8(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv1i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 1 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv1i16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv32i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 32 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv32i8(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv2i8(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i8>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv2i8(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
 
@@ -49992,228 +7562,20 @@
   ret <vscale x 16 x half> %1
 }
 
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
+define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
+; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v16, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv2i16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 2 x i16>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
   ret <vscale x 16 x half> %1
 }
 
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv2i16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv4i32(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 4 x i32>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x half> @test_vluxseg2_nxv16f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
-  ret <vscale x 16 x half> %1
-}
-
-define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 16 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv16f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.nxv16f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
-  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv4i32(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
-  ret <vscale x 16 x half> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv16i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv16i16(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv1i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv1i8(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv16i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv16i8(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv2i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv2i32(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -50230,228 +7592,20 @@
   ret <vscale x 4 x double> %1
 }
 
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv32i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
   ret <vscale x 4 x double> %1
 }
 
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv32i16(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv1i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv1i32(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv8i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv8i16(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv8i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv8i8(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv8i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv8i32(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv64i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv64i8(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -50468,194 +7622,20 @@
   ret <vscale x 4 x double> %1
 }
 
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv1i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
   ret <vscale x 4 x double> %1
 }
 
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv1i16(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv32i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv32i8(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv2i8(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv2i8(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv16i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv16i32(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv2i16(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x double> @test_vluxseg2_nxv4f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
-  ret <vscale x 4 x double> %1
-}
-
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv2i16(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -50672,56 +7652,18 @@
   ret <vscale x 4 x double> %1
 }
 
-define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
-  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
-  ret <vscale x 4 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
+  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
+  ret <vscale x 4 x double> %1
 }
 
 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
@@ -50740,160 +7682,20 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -50910,194 +7712,20 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -51114,229 +7742,20 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg2_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -51353,165 +7772,22 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -51528,200 +7804,22 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -51738,236 +7836,22 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg3_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -51984,170 +7868,23 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -52164,206 +7901,23 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -52380,243 +7934,23 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg4_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg4.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -52633,175 +7967,24 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -52818,212 +8001,24 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -53040,250 +8035,24 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg5_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg5.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -53300,180 +8069,25 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -53490,218 +8104,25 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -53718,257 +8139,25 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg6_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg6.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -53985,11 +8174,10 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -53997,173 +8185,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -54180,11 +8210,10 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -54192,212 +8221,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -54414,11 +8246,10 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -54426,252 +8257,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg7_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg7.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv16i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv16i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -54688,51 +8282,10 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv16i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -54741,137 +8294,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv16i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv2i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv2i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv4i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv4i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv32i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv32i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -54888,51 +8319,10 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv8i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -54941,177 +8331,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv8i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv8i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv8i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv8i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv8i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv64i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv64i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv4i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv4i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -55128,211 +8356,10 @@
   ret <vscale x 1 x double> %1
 }
 
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv32i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv32i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv2i8(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv2i8(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv16i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv16i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv2i16(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv2i16(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv4i32(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x double> @test_vluxseg8_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
-  ret <vscale x 1 x double> %1
-}
-
-define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -55341,117 +8368,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.nxv1f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
-  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv4i32(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
-  ret <vscale x 1 x double> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
 }
 
 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
@@ -55470,364 +8393,20 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -55844,58 +8423,20 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -55912,163 +8453,20 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg2_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -56085,375 +8483,22 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -56470,60 +8515,22 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -56540,168 +8547,22 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg3_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -56718,386 +8579,23 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -57114,62 +8612,23 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -57186,173 +8645,23 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg4_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -57369,397 +8678,24 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -57776,64 +8712,24 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -57850,178 +8746,24 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg5_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg5.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -58038,408 +8780,25 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -58456,66 +8815,25 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -58532,183 +8850,25 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg6_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg6.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -58725,11 +8885,10 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -58737,407 +8896,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -59154,11 +8921,10 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -59166,56 +8932,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -59232,11 +8957,10 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -59244,176 +8968,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg7_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg7.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv16i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv16i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv1i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv1i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv16i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv16i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -59430,171 +8993,10 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv4i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv4i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv32i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv32i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv1i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv1i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv8i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -59603,257 +9005,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv8i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv8i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv8i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv8i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv8i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv64i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv64i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv4i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv4i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv1i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv1i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv32i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv32i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -59870,70 +9030,27 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v10
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv16i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv16i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -59950,51 +9067,10 @@
   ret <vscale x 2 x float> %1
 }
 
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv4i32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x float> @test_vluxseg8_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
-  ret <vscale x 2 x float> %1
-}
-
-define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -60003,49 +9079,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.nxv2f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
-  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv4i32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
-  ret <vscale x 2 x float> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
 }
 
 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
@@ -60064,160 +9104,20 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -60234,194 +9134,20 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -60438,229 +9164,20 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg2_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -60677,165 +9194,22 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -60852,200 +9226,22 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -61062,236 +9258,22 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg3_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -61308,170 +9290,23 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -61488,206 +9323,23 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -61704,243 +9356,23 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg4_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg4.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -61957,175 +9389,24 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -62142,212 +9423,24 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -62364,250 +9457,24 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg5_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg5.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -62624,180 +9491,25 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -62814,218 +9526,25 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -63042,257 +9561,25 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg6_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg6.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -63309,11 +9596,10 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -63321,173 +9607,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -63504,11 +9632,10 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -63516,212 +9643,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -63738,11 +9668,10 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -63750,252 +9679,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg7_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg7.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv16i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv16i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -64012,51 +9704,10 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv16i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -64065,137 +9716,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv16i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv2i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv2i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv4i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv4i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv32i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv32i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -64212,51 +9741,10 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv8i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -64265,177 +9753,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv8i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv8i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv8i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv8i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv8i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv64i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv64i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv4i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv4i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -64452,211 +9778,10 @@
   ret <vscale x 1 x half> %1
 }
 
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv32i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv32i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv2i8(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv2i8(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv16i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv16i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv2i16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv2i16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv4i32(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x half> @test_vluxseg8_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
-  ret <vscale x 1 x half> %1
-}
-
-define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf4,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -64665,49 +9790,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf4,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.nxv1f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
-  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv4i32(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
-  ret <vscale x 1 x half> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
 }
 
 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
@@ -64726,160 +9815,20 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -64896,194 +9845,20 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -65100,229 +9875,20 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg2_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -65339,165 +9905,22 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -65514,200 +9937,22 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -65724,236 +9969,22 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg3_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -65970,170 +10001,23 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -66150,206 +10034,23 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -66366,243 +10067,23 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg4_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg4.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -66619,175 +10100,24 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -66804,212 +10134,24 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -67026,250 +10168,24 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg5_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg5.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -67286,180 +10202,25 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -67476,218 +10237,25 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -67704,257 +10272,25 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg6_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg6.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -67971,11 +10307,10 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -67983,173 +10318,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -68166,11 +10343,10 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -68178,212 +10354,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -68400,11 +10379,10 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -68412,252 +10390,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg7_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg7.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv16i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv16i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
 
@@ -68674,51 +10415,10 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv16i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -68727,137 +10427,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv16i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv2i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv2i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv4i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv4i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv32i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv32i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
 
@@ -68874,51 +10452,10 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv8i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -68927,177 +10464,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv8i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv8i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv8i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv8i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 8 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv8i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv64i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 64 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv64i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv4i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv4i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
 
@@ -69114,211 +10489,10 @@
   ret <vscale x 1 x float> %1
 }
 
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
+define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv32i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 32 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv32i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv2i8(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i8>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv2i8(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv16i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 16 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv16i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv2i16(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 2 x i16>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv2i16(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv4i32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 4 x i32>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x float> @test_vluxseg8_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
-  ret <vscale x 1 x float> %1
-}
-
-define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -69327,253 +10501,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e32,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.nxv1f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
-  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv4i32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
-  ret <vscale x 1 x float> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv16i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv16i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv1i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv1i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv16i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv16i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv2i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv2i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv4i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv4i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv32i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv32i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv1i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv1i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
 }
 
 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
@@ -69592,22 +10526,18 @@
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
 }
 
 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
@@ -69626,22 +10556,18 @@
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
 }
 
 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
@@ -69660,541 +10586,20 @@
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv64i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv64i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv4i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv4i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv1i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv1i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv32i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv32i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv2i8(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv2i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv16i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv16i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv2i16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv2i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv4i32(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg2_nxv8f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.nxv8f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv4i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv16i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv16i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv1i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv1i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv16i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv16i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv2i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv2i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv4i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv4i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv32i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv32i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv1i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv1i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -70211,23 +10616,20 @@
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
 }
 
 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
@@ -70246,23 +10648,20 @@
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
 }
 
 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
@@ -70281,557 +10680,21 @@
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv64i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv64i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv4i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv4i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv1i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv1i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv32i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv32i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv2i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv2i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv16i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv16i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv2i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv2i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv4i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg3_nxv8f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.nxv8f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv4i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv16i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv16i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv1i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv1i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv16i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv16i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv2i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv2i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv4i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv4i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv32i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv32i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv1i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv1i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -70848,24 +10711,21 @@
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vmv2r.v v14, v12
+; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
 }
 
 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
@@ -70884,24 +10744,21 @@
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vmv2r.v v14, v12
+; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
 }
 
 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
@@ -70920,552 +10777,23 @@
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv64i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vmv2r.v v18, v16
 ; CHECK-NEXT:    vmv2r.v v20, v16
 ; CHECK-NEXT:    vmv2r.v v22, v16
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v18
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv64i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv4i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
   ret <vscale x 8 x half> %1
 }
 
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv4i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv1i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv1i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv32i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv32i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv2i8(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv2i8(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv16i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv16i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv2i16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv2i16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv4i32(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x half> @test_vluxseg4_nxv8f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
-  ret <vscale x 8 x half> %1
-}
-
-define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.nxv8f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
-  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv4i32(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
-  ret <vscale x 8 x half> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv16i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 16 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv16i16(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv1i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv1i8(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv16i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 16 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv16i8(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv2i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv2i32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv4i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv4i16(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv32i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 32 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv32i16(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv1i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv1i32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
 
@@ -71482,22 +10810,18 @@
   ret <vscale x 8 x float> %1
 }
 
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
+  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
+  ret <vscale x 8 x float> %1
 }
 
 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
@@ -71516,22 +10840,18 @@
   ret <vscale x 8 x float> %1
 }
 
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
+  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
+  ret <vscale x 8 x float> %1
 }
 
 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
@@ -71550,398 +10870,20 @@
   ret <vscale x 8 x float> %1
 }
 
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
+define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
+; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv64i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 64 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
   ret <vscale x 8 x float> %1
 }
 
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv64i8(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv4i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv4i8(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv1i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 1 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv1i16(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv32i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 32 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv32i8(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv2i8(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i8>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv2i8(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv16i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 16 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv4r.v v20, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv16i32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv2i16(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 2 x i16>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv2i16(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
-declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv4i32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 4 x i32>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x float> @test_vluxseg2_nxv8f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
-  ret <vscale x 8 x float> %1
-}
-
-define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv8f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m4,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv4r.v v16, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m4,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.nxv8f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
-  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv4i32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
-  ret <vscale x 8 x float> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv16i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv16i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv1i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv1i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv16i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv16i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -71958,364 +10900,20 @@
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv4i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv4i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv32i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv32i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv1i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv1i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv8i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv8i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv8i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv8i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv8i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv8i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv64i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv64i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv4i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv4i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv1i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv1i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv32i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv32i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -72332,58 +10930,20 @@
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv16i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv16i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -72400,163 +10960,20 @@
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv4i32(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg2_nxv2f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.nxv2f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv4i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv16i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv16i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv1i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv1i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv16i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv16i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -72573,375 +10990,22 @@
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv4i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv4i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv32i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv32i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv1i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv1i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv8i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv8i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv8i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv8i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv8i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv8i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv64i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv64i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv4i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv4i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv1i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv1i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv32i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv32i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -72958,60 +11022,22 @@
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv16i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv16i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -73028,168 +11054,22 @@
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv4i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg3_nxv2f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.nxv2f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv4i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv16i16(double*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv16i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv16i16(double* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv16i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv1i8(double*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv1i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv1i8(double* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv1i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv16i8(double*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv16i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv16i8(double* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv16i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -73206,386 +11086,23 @@
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv4i16(double*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv4i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv4i16(double* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv4i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv32i16(double*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv32i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv32i16(double* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv32i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv1i32(double*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv1i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv1i32(double* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv1i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv8i16(double*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv8i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv8i16(double* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv8i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv8i8(double*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv8i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv8i8(double* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv8i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv8i32(double*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv8i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v12
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv8i32(double* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv8i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv64i8(double*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv64i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv64i8(double* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv64i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv4i8(double*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv4i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv4i8(double* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv4i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv1i16(double*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv1i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv1i16(double* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv1i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv32i8(double*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv32i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
-  ret <vscale x 2 x double> %1
-}
-
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv32i8(double* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv32i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -73602,62 +11119,23 @@
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vmv2r.v v14, v12
+; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv16i32(double*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv16i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv16i32(double* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv16i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -73674,198 +11152,23 @@
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vmv2r.v v14, v12
+; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv4i32(double*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv4i32(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x double> @test_vluxseg4_nxv2f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
   ret <vscale x 2 x double> %1
 }
 
-define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e64,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e64,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.nxv2f64.nxv4i32(double* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
-  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg4.mask.nxv2f64.nxv4i32(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
-  ret <vscale x 2 x double> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -73882,228 +11185,20 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -74120,194 +11215,20 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg2_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -74324,164 +11245,20 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -74498,235 +11275,22 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -74743,200 +11307,22 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg3_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -74953,169 +11339,21 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -75132,242 +11370,23 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -75384,206 +11403,23 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg4_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -75600,174 +11436,23 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -75784,249 +11469,24 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -76043,212 +11503,24 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg5_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -76265,179 +11537,24 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -76454,256 +11571,25 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -76720,218 +11606,25 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg6_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -76948,184 +11641,25 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -77142,11 +11676,10 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -77154,251 +11687,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -77415,11 +11712,10 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -77427,212 +11723,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg7_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -77649,11 +11748,10 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -77661,177 +11759,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv16i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv16i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv1i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv1i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv16i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv16i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv2i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv2i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -77848,131 +11784,10 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv32i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv32i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv1i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv1i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv8i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -77981,137 +11796,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv8i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv8i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv8i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv8i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv8i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv64i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv64i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -78128,230 +11821,27 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v10
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv1i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv1i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv32i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv32i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv2i8(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv2i8(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv16i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv16i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv2i16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x half> @test_vluxseg8_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
-  ret <vscale x 4 x half> %1
-}
-
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv2i16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -78368,130 +11858,25 @@
   ret <vscale x 4 x half> %1
 }
 
-define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,m1,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v12
+; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v19, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e16,m1,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v13
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
-  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
-  ret <vscale x 4 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
 }
 
 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
@@ -78510,364 +11895,20 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -78884,58 +11925,20 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -78952,163 +11955,20 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
+; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
+; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
+; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg2_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -79125,375 +11985,22 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -79510,60 +12017,22 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -79580,168 +12049,22 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg3_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -79758,386 +12081,23 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -80154,62 +12114,23 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -80226,173 +12147,23 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg4_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -80409,397 +12180,24 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -80816,64 +12214,24 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -80890,178 +12248,24 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg5ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg5_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg5ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg5.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -81078,408 +12282,25 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -81496,66 +12317,25 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -81572,183 +12352,25 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
 ; CHECK-NEXT:    vmv1r.v v5, v1
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg6ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg6_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg6ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg6.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -81765,11 +12387,10 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -81777,407 +12398,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -82194,11 +12423,10 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -82206,56 +12434,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei8.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -82272,11 +12459,10 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8
+; CHECK-NEXT:    vmv1r.v v1, v8
 ; CHECK-NEXT:    vmv1r.v v2, v1
 ; CHECK-NEXT:    vmv1r.v v3, v1
 ; CHECK-NEXT:    vmv1r.v v4, v1
@@ -82284,176 +12470,15 @@
 ; CHECK-NEXT:    vmv1r.v v6, v1
 ; CHECK-NEXT:    vmv1r.v v7, v1
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg7ei16.v v1, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg7_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8
-; CHECK-NEXT:    vmv1r.v v2, v1
-; CHECK-NEXT:    vmv1r.v v3, v1
-; CHECK-NEXT:    vmv1r.v v4, v1
-; CHECK-NEXT:    vmv1r.v v5, v1
-; CHECK-NEXT:    vmv1r.v v6, v1
-; CHECK-NEXT:    vmv1r.v v7, v1
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg7ei32.v v1, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg7.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv16i16(half*, <vscale x 16 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv16i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv16i16(half* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv16i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv1i8(half*, <vscale x 1 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv1i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv1i8(half* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv1i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv16i8(half*, <vscale x 16 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv16i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv16i8(half* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv16i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half*, <vscale x 2 x i32>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
 
@@ -82470,171 +12495,10 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i32(half* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv4i16(half*, <vscale x 4 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv4i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv4i16(half* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv4i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv32i16(half*, <vscale x 32 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv32i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv32i16(half* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv32i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv1i32(half*, <vscale x 1 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv1i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv1i32(half* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv1i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv8i16(half*, <vscale x 8 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv8i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -82643,257 +12507,15 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv8i16(half* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv8i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv8i8(half*, <vscale x 8 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv8i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv8i8(half* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv8i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv8i32(half*, <vscale x 8 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv8i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 8 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv8i32(half* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv8i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 8 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv64i8(half*, <vscale x 64 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv64i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 64 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv64i8(half* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv64i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 64 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv4i8(half*, <vscale x 4 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv4i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv4i8(half* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv4i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv1i16(half*, <vscale x 1 x i16>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv1i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 1 x i16>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv1i16(half* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv1i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 1 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv32i8(half*, <vscale x 32 x i8>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv32i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 32 x i8>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv32i8(half* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv32i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 32 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half*, <vscale x 2 x i8>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
 
@@ -82910,70 +12532,27 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v10
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei8.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i8(half* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv16i32(half*, <vscale x 16 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv16i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 16 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv16i32(half* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv16i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 16 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half*, <vscale x 2 x i16>, i32)
 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
 
@@ -82990,51 +12569,10 @@
   ret <vscale x 2 x half> %1
 }
 
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
+define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v10, v9
-; CHECK-NEXT:    vmv1r.v v11, v9
-; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    vmv1r.v v13, v9
-; CHECK-NEXT:    vmv1r.v v14, v9
-; CHECK-NEXT:    vmv1r.v v15, v9
-; CHECK-NEXT:    vmv1r.v v16, v9
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei16.v v9, (a0), v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv2i16(half* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv4i32(half*, <vscale x 4 x i32>, i32)
-declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv4i32(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 4 x i32>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x half> @test_vluxseg8_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v1
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
-  ret <vscale x 2 x half> %1
-}
-
-define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e16,mf2,ta,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v10
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv1r.v v13, v10
@@ -83043,151 +12581,13 @@
 ; CHECK-NEXT:    vmv1r.v v16, v10
 ; CHECK-NEXT:    vmv1r.v v17, v10
 ; CHECK-NEXT:    vsetvli a1, a1, e16,mf2,tu,mu
-; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.nxv2f16.nxv4i32(half* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
-  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv4i32(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 4 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
-  ret <vscale x 2 x half> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv16i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv16i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv1i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv1i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv16i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv16i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv2i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv2i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
 }
 
 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
@@ -83206,228 +12606,20 @@
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv32i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv32i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv1i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv1i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv8i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv8i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv8i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv8i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv8i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv8i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv64i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv64i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -83444,194 +12636,20 @@
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv1i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv1i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv32i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv32i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv2i8(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv2i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv16i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv16i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv2i16(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg2_nxv4f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv2i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -83648,164 +12666,20 @@
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg2_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
+; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg2ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
+; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
+; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv16i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv16i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv1i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv1i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv16i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv16i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv2i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv2i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -83822,235 +12696,22 @@
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv32i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv32i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv1i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv1i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv8i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv8i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv8i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv8i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv8i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv8i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv64i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv64i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -84067,200 +12728,22 @@
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv1i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv1i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv32i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv32i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv2i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei8.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv2i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv16i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv16i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv2i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg3_nxv4f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8
-; CHECK-NEXT:    vmv2r.v v4, v2
-; CHECK-NEXT:    vmv2r.v v6, v2
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei16.v v2, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v4
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv2i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -84277,169 +12760,22 @@
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8
+; CHECK-NEXT:    vmv2r.v v2, v8
 ; CHECK-NEXT:    vmv2r.v v4, v2
 ; CHECK-NEXT:    vmv2r.v v6, v2
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg3ei32.v v2, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v4
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv16i16(float*, <vscale x 16 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv16i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv16i16(float* %base, <vscale x 16 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv16i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 16 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv1i8(float*, <vscale x 1 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv1i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv1i8(float* %base, <vscale x 1 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv1i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 1 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv16i8(float*, <vscale x 16 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv16i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv16i8(float* %base, <vscale x 16 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv16i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 16 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv2i32(float*, <vscale x 2 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv2i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv2i32(float* %base, <vscale x 2 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv2i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 2 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float*, <vscale x 4 x i16>, i32)
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
 
@@ -84456,242 +12792,23 @@
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float* %base, <vscale x 4 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv32i16(float*, <vscale x 32 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv32i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv32i16(float* %base, <vscale x 32 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv32i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 32 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv1i32(float*, <vscale x 1 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv1i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv1i32(float* %base, <vscale x 1 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv1i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 1 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv8i16(float*, <vscale x 8 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv8i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv8i16(float* %base, <vscale x 8 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv8i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 8 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv8i8(float*, <vscale x 8 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv8i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv8i8(float* %base, <vscale x 8 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv8i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 8 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv8i32(float*, <vscale x 8 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv8i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 8 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v12
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv8i32(float* %base, <vscale x 8 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv8i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 8 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv64i8(float*, <vscale x 64 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv64i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 64 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv64i8(float* %base, <vscale x 64 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv64i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 64 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float*, <vscale x 4 x i8>, i32)
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
@@ -84708,206 +12825,23 @@
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float* %base, <vscale x 4 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv1i16(float*, <vscale x 1 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv1i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 1 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv1i16(float* %base, <vscale x 1 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv1i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 1 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv32i8(float*, <vscale x 32 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv32i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 32 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v12
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v8, v0.t
+; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv32i8(float* %base, <vscale x 32 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv32i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 32 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv2i8(float*, <vscale x 2 x i8>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv2i8(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i8>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
   %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv2i8(float* %base, <vscale x 2 x i8> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv2i8(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 2 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv16i32(float*, <vscale x 16 x i32>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv16i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 16 x i32>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv16i32(float* %base, <vscale x 16 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv16i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 16 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv2i16(float*, <vscale x 2 x i16>, i32)
-declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv2i16(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 2 x i16>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x float> @test_vluxseg4_nxv4f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl) {
-; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v0, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v2
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
-  ret <vscale x 4 x float> %1
-}
-
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
-; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv2i16(float* %base, <vscale x 2 x i16> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv2i16(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 2 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
-}
-
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float*, <vscale x 4 x i32>, i32)
 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
 
@@ -84924,23 +12858,20 @@
   ret <vscale x 4 x float> %1
 }
 
-define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
+define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a2, a1, e32,m2,ta,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv2r.v v12, v10
-; CHECK-NEXT:    vmv2r.v v14, v10
-; CHECK-NEXT:    vmv2r.v v16, v10
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vmv2r.v v14, v12
+; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v18, v12
 ; CHECK-NEXT:    vsetvli a1, a1, e32,m2,tu,mu
-; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v8, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v14
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float* %base, <vscale x 4 x i32> %index, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
-  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
-  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
-  ret <vscale x 4 x float> %3
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+  ret <vscale x 4 x float> %1
 }